Notebook provide the code to generate bbox ready dataset for classification tasks, we will use already trained public classifier from detectron2 and use their predictions to generate bbox datasets.
Reason for this task is that when training certain classifiers on public datasets like below mask and if we want to use the same model to identify some other lables we will not be able to perform that task, since datasets doesnot have those bbox.
Eg Below datasets have mask/nomask bbox, but if in order to train the model to identify person as well, we will use the predictions from pretrained classier and use thos bbox as train set
Copy the Authentication json for kaggle to download our datasets, 1 time activity
from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive
''' Link to explain how to download Datasets from kaggle https://www.kaggle.com/general/74235'''
!pip install -q kaggle
!mkdir ~/.kaggle
!cp '/content/drive/My Drive/Kaggle/kaggle.json' ~/.kaggle/
!chmod 600 ~/.kaggle/kaggle.json
# !kaggle datasets list
%%time
!kaggle datasets download -d wobotintelligence/face-mask-detection-dataset -p dataset
!unzip dataset/face-mask-detection-dataset.zip -d dataset/face-mask-detection-dataset
!rm dataset/face-mask-detection-dataset.zip
!kaggle datasets download -d ivandanilovich/medical-masks-dataset-images-tfrecords -p dataset
!unzip dataset/medical-masks-dataset-images-tfrecords.zip -d dataset/medical-masks-dataset > /dev/null
!rm dataset/medical-masks-dataset-images-tfrecords.zip
!kaggle datasets download -d abdelaliezzyn/medical-masks -p dataset
!unzip dataset/medical-masks.zip -d dataset/medical-masks > /dev/null
!rm dataset/medical-masks.zip
CPU times: user 2.16 ms, sys: 6.39 ms, total: 8.56 ms Wall time: 311 ms
import torch, torchvision
print(torch.__version__, torch.cuda.is_available())
!gcc --version
# install detectron2: (Colab has CUDA 10.1 + torch 1.6)
assert torch.__version__.startswith("1.6")
!pip install pyyaml==5.1 pycocotools>=2.0.1
!pip uninstall -y detectron2
!pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.6/index.html
1.6.0+cu101 True gcc (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0 Copyright (C) 2017 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. Uninstalling detectron2-0.2.1+cu101: Successfully uninstalled detectron2-0.2.1+cu101 Looking in links: https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.6/index.html Collecting detectron2 Using cached https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.6/detectron2-0.2.1%2Bcu101-cp36-cp36m-linux_x86_64.whl Requirement already satisfied: pycocotools>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from detectron2) (2.0.2) Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from detectron2) (0.16.0) Requirement already satisfied: yacs>=0.1.6 in /usr/local/lib/python3.6/dist-packages (from detectron2) (0.1.8) Requirement already satisfied: pydot in /usr/local/lib/python3.6/dist-packages (from detectron2) (1.3.0) Requirement already satisfied: tensorboard in /usr/local/lib/python3.6/dist-packages (from detectron2) (2.3.0) Requirement already satisfied: tabulate in /usr/local/lib/python3.6/dist-packages (from detectron2) (0.8.7) Requirement already satisfied: fvcore>=0.1.1 in /usr/local/lib/python3.6/dist-packages (from detectron2) (0.1.2.post20200926) Requirement already satisfied: cloudpickle in /usr/local/lib/python3.6/dist-packages (from detectron2) (1.3.0) Requirement already satisfied: Pillow>=7.1 in /usr/local/lib/python3.6/dist-packages (from detectron2) (7.2.0) Requirement already satisfied: tqdm>4.29.0 in /usr/local/lib/python3.6/dist-packages (from detectron2) (4.41.1) Requirement already satisfied: termcolor>=1.1 in /usr/local/lib/python3.6/dist-packages (from detectron2) (1.1.0) Requirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from detectron2) (3.2.2) Requirement already satisfied: mock in /usr/local/lib/python3.6/dist-packages (from detectron2) (4.0.2) Requirement already satisfied: setuptools>=18.0 in /usr/local/lib/python3.6/dist-packages (from pycocotools>=2.0.1->detectron2) (50.3.0) Requirement already satisfied: cython>=0.27.3 in /usr/local/lib/python3.6/dist-packages (from pycocotools>=2.0.1->detectron2) (0.29.21) Requirement already satisfied: PyYAML in /usr/local/lib/python3.6/dist-packages (from yacs>=0.1.6->detectron2) (5.1) Requirement already satisfied: pyparsing>=2.1.4 in /usr/local/lib/python3.6/dist-packages (from pydot->detectron2) (2.4.7) Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from tensorboard->detectron2) (0.4.1) Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard->detectron2) (3.2.2) Requirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.6/dist-packages (from tensorboard->detectron2) (0.10.0) Requirement already satisfied: numpy>=1.12.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard->detectron2) (1.18.5) Requirement already satisfied: wheel>=0.26; python_version >= "3" in /usr/local/lib/python3.6/dist-packages (from tensorboard->detectron2) (0.35.1) Requirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard->detectron2) (1.15.0) Requirement already satisfied: grpcio>=1.24.3 in /usr/local/lib/python3.6/dist-packages (from tensorboard->detectron2) (1.32.0) Requirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard->detectron2) (2.23.0) Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard->detectron2) (1.7.0) Requirement already satisfied: protobuf>=3.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard->detectron2) (3.12.4) Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard->detectron2) (1.0.1) Requirement already satisfied: google-auth<2,>=1.6.3 in /usr/local/lib/python3.6/dist-packages (from tensorboard->detectron2) (1.17.2) Requirement already satisfied: portalocker in /usr/local/lib/python3.6/dist-packages (from fvcore>=0.1.1->detectron2) (2.0.0) Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->detectron2) (1.2.0) Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib->detectron2) (0.10.0) Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->detectron2) (2.8.1) Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard->detectron2) (1.3.0) Requirement already satisfied: importlib-metadata; python_version < "3.8" in /usr/local/lib/python3.6/dist-packages (from markdown>=2.6.8->tensorboard->detectron2) (1.7.0) Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard->detectron2) (1.24.3) Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard->detectron2) (2020.6.20) Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard->detectron2) (2.10) Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard->detectron2) (3.0.4) Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard->detectron2) (4.1.1) Requirement already satisfied: rsa<5,>=3.1.4; python_version >= "3" in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard->detectron2) (4.6) Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard->detectron2) (0.2.8) Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard->detectron2) (3.1.0) Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata; python_version < "3.8"->markdown>=2.6.8->tensorboard->detectron2) (3.1.0) Requirement already satisfied: pyasn1>=0.1.3 in /usr/local/lib/python3.6/dist-packages (from rsa<5,>=3.1.4; python_version >= "3"->google-auth<2,>=1.6.3->tensorboard->detectron2) (0.4.8) Installing collected packages: detectron2 Successfully installed detectron2-0.2.1+cu101
dataset_1 = '/content/dataset/face-mask-detection-dataset/Medical mask/Medical mask/Medical Mask/annotations'
dataset_2 = "/content/dataset/medical-masks-dataset/medical-masks-dataset/labels"
dataset_3 = "/content/dataset/medical-masks/medical_masks/train/annotations"
import os
img_files_1 = []
dataset_1_files = [os.path.join(dataset_1, x) for x in os.listdir(dataset_1)]
for dataset_1_file in dataset_1_files:
dataset_1_img_file = dataset_1_file.replace("annotations", "images").replace(".json", "")
if not os.path.exists(dataset_1_img_file):
continue
img_files_1.append(dataset_1_img_file)
print("Total image files with annotations:", len(img_files_1))
img_files_2 = []
dataset_2_files = [os.path.join(dataset_2, x) for x in os.listdir(dataset_2)]
for dataset_2_file in dataset_2_files:
dataset_2_img_file = dataset_2_file.replace("labels", "images").replace("xml", "jpg")
if not os.path.exists(dataset_2_img_file):
continue
img_files_2.append(dataset_2_img_file)
print("Total image files with annotations:", len(img_files_2))
img_files_3 = []
dataset_3_files = [os.path.join(dataset_3, x) for x in os.listdir(dataset_3)]
for dataset_3_file in dataset_3_files:
dataset_3_img_file = dataset_3_file.replace("annotations", "images").replace("xml", "png")
if not os.path.exists(dataset_3_img_file):
continue
img_files_3.append(dataset_3_img_file)
print("Total image files with annotations:", len(img_files_3))
print("=" * 50)
img_files = img_files_1 + img_files_2 + img_files_3
print("Total image files with annotations:", len(img_files))
Total image files with annotations: 4326 Total image files with annotations: 1148 Total image files with annotations: 7782 ================================================== Total image files with annotations: 13256
import detectron2
import torch
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import os, json, cv2, random
from google.colab.patches import cv2_imshow
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.modeling import build_model
""" Download pretrained models """
## https://github.com/facebookresearch/detectron2/blob/master/MODEL_ZOO.md
cfg = get_cfg()
# add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library
model_yaml = "COCO-Detection/faster_rcnn_R_50_C4_3x.yaml"
model_yaml = "COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml" # AP 44.3, infer 0.103
cfg.merge_from_file(model_zoo.get_config_file(model_yaml))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.9 # set threshold for this model
cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_yaml)
predictor = DefaultPredictor(cfg)
social_model = build_model(cfg)
_ = DetectionCheckpointer(social_model).load(cfg.MODEL.WEIGHTS)
model_final_2d9806.pkl: 431MB [00:46, 9.34MB/s]
%%time
#read an image
img = cv2.imread(img_files[0])
#pass to the model
outputs = predictor(img)
# Use `Visualizer` to draw the predictions on the image.
v = Visualizer(img[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2_imshow(v.get_image()[:, :, ::-1])
CPU times: user 2.54 s, sys: 140 ms, total: 2.68 s Wall time: 2.61 s
# outputs['instances'].pred_boxes
import pandas as pd
import time
starttime = time.time()
dataframe_file = "dataset_person.pkl"
data = []
if os.path.exists(dataframe_file):
df = pd.read_pickle(dataframe_file)
data = df.to_dict('records')
for fileno, img_file in enumerate(img_files):
if df[df['filepath'] == img_file].shape[0] > 0:
# print("Already processed:", img_file)
continue
img = cv2.imread(img_file)
outputs = predictor(img)
bbox = outputs['instances'].pred_boxes.tensor.cpu().numpy()
for i, classid in enumerate(outputs['instances'].pred_classes.cpu().numpy()):
if classid != 0:
continue
d = {'filepath': img_file, 'bbox': bbox[i], 'classname': 'person'}
data.append(d)
if fileno % 100 == 0:
df = pd.DataFrame(data)
df.to_pickle(dataframe_file)
timetaken = time.time() - starttime
print("Time Elaped:", timetaken, "Processed:", fileno, "Time per image:", timetaken / fileno, df.shape)
df = pd.DataFrame(data)
df.to_pickle(dataframe_file)
timetaken = time.time() - starttime
print("Time Elaped:", timetaken, "Processed:", fileno, "Time per image:", timetaken / fileno, df.shape)
print("Total unique files:", len(df['filepath'].unique()))
df
Time Elaped: 12.439038276672363 Processed: 200 Time per image: 0.06219519138336182 (419, 3) Time Elaped: 42.34010910987854 Processed: 300 Time per image: 0.14113369703292847 (602, 3) Time Elaped: 71.18359041213989 Processed: 400 Time per image: 0.17795897603034974 (801, 3) Time Elaped: 100.16335439682007 Processed: 500 Time per image: 0.20032670879364015 (967, 3) Time Elaped: 129.84585332870483 Processed: 600 Time per image: 0.2164097555478414 (1172, 3) Time Elaped: 159.7043318748474 Processed: 700 Time per image: 0.2281490455354963 (1427, 3) Time Elaped: 188.93535780906677 Processed: 800 Time per image: 0.23616919726133345 (1641, 3) Time Elaped: 217.5003523826599 Processed: 900 Time per image: 0.24166705820295545 (1856, 3) Time Elaped: 246.50108122825623 Processed: 1000 Time per image: 0.24650108122825623 (2084, 3) Time Elaped: 276.5544638633728 Processed: 1100 Time per image: 0.2514131489667025 (2272, 3) Time Elaped: 306.68603515625 Processed: 1200 Time per image: 0.2555716959635417 (2460, 3) Time Elaped: 335.61204981803894 Processed: 1300 Time per image: 0.25816311524464536 (2653, 3) Time Elaped: 364.2373003959656 Processed: 1400 Time per image: 0.26016950028283253 (2842, 3) Time Elaped: 393.6844389438629 Processed: 1500 Time per image: 0.2624562926292419 (3033, 3) Time Elaped: 422.1290445327759 Processed: 1600 Time per image: 0.2638306528329849 (3210, 3) Time Elaped: 452.2097635269165 Processed: 1700 Time per image: 0.26600574325112736 (3416, 3) Time Elaped: 480.8140985965729 Processed: 1800 Time per image: 0.2671189436647627 (3593, 3) Time Elaped: 509.54856157302856 Processed: 1900 Time per image: 0.26818345345948874 (3825, 3) Time Elaped: 539.1471180915833 Processed: 2000 Time per image: 0.2695735590457916 (4029, 3) Time Elaped: 568.7095277309418 Processed: 2100 Time per image: 0.27081406082425796 (4250, 3) Time Elaped: 596.9898674488068 Processed: 2200 Time per image: 0.27135903065854855 (4457, 3) Time Elaped: 626.1315550804138 Processed: 2300 Time per image: 0.27223111090452773 (4679, 3) Time Elaped: 655.9704904556274 Processed: 2400 Time per image: 0.2733210376898448 (4882, 3) Time Elaped: 685.8526122570038 Processed: 2500 Time per image: 0.27434104490280153 (5079, 3) Time Elaped: 714.7918088436127 Processed: 2600 Time per image: 0.2749199264783126 (5294, 3) Time Elaped: 744.3259184360504 Processed: 2700 Time per image: 0.27567626608742607 (5498, 3) Time Elaped: 772.5492107868195 Processed: 2800 Time per image: 0.2759104324238641 (5662, 3) Time Elaped: 802.5320835113525 Processed: 2900 Time per image: 0.27673520121081124 (5908, 3) Time Elaped: 832.1124866008759 Processed: 3000 Time per image: 0.27737082886695863 (6081, 3) Time Elaped: 861.7645058631897 Processed: 3100 Time per image: 0.27798855027844827 (6310, 3) Time Elaped: 890.5924515724182 Processed: 3200 Time per image: 0.2783101411163807 (6516, 3) Time Elaped: 919.8103542327881 Processed: 3300 Time per image: 0.27873041037357216 (6701, 3) Time Elaped: 948.2503044605255 Processed: 3400 Time per image: 0.2788971483707428 (6883, 3) Time Elaped: 977.6111776828766 Processed: 3500 Time per image: 0.27931747933796475 (7076, 3) Time Elaped: 1006.5602385997772 Processed: 3600 Time per image: 0.2796000662777159 (7297, 3) Time Elaped: 1036.3271188735962 Processed: 3700 Time per image: 0.28008841050637734 (7521, 3) Time Elaped: 1065.33251953125 Processed: 3800 Time per image: 0.2803506630345395 (7718, 3) Time Elaped: 1094.0486528873444 Processed: 3900 Time per image: 0.28052529561213957 (7947, 3) Time Elaped: 1124.068472146988 Processed: 4000 Time per image: 0.281017118036747 (8177, 3) Time Elaped: 1153.014943599701 Processed: 4100 Time per image: 0.2812231569755368 (8345, 3) Time Elaped: 1182.4438395500183 Processed: 4200 Time per image: 0.28153424751190914 (8584, 3) Time Elaped: 1210.8423986434937 Processed: 4300 Time per image: 0.2815912554984869 (8783, 3) Time Elaped: 1242.0401253700256 Processed: 4400 Time per image: 0.28228184667500583 (9240, 3) Time Elaped: 1273.1626152992249 Processed: 4500 Time per image: 0.28292502562204996 (9798, 3) Time Elaped: 1304.4600839614868 Processed: 4600 Time per image: 0.28357827912206235 (10196, 3) Time Elaped: 1335.7647516727448 Processed: 4700 Time per image: 0.28420526631334997 (10660, 3) Time Elaped: 1366.5834493637085 Processed: 4800 Time per image: 0.2847048852841059 (11103, 3) Time Elaped: 1397.24986743927 Processed: 4900 Time per image: 0.2851530341712796 (11495, 3) Time Elaped: 1428.5003271102905 Processed: 5000 Time per image: 0.2857000654220581 (11959, 3) Time Elaped: 1459.370050907135 Processed: 5100 Time per image: 0.28615099037394803 (12429, 3) Time Elaped: 1490.5002658367157 Processed: 5200 Time per image: 0.2866346665070607 (12967, 3) Time Elaped: 1521.326069355011 Processed: 5300 Time per image: 0.2870426545952851 (13435, 3) Time Elaped: 1552.3640222549438 Processed: 5400 Time per image: 0.2874748189361007 (13849, 3) Time Elaped: 1582.9156937599182 Processed: 5500 Time per image: 0.28780285341089423 (14254, 3) Time Elaped: 1612.9937398433685 Processed: 5600 Time per image: 0.28803459640060153 (14531, 3) Time Elaped: 1642.031628370285 Processed: 5700 Time per image: 0.2880757242754886 (14751, 3) Time Elaped: 1671.2753977775574 Processed: 5800 Time per image: 0.288150930651303 (14995, 3) Time Elaped: 1700.1820256710052 Processed: 5900 Time per image: 0.2881664450289839 (15220, 3) Time Elaped: 1728.884595632553 Processed: 6000 Time per image: 0.2881474326054255 (15428, 3) Time Elaped: 1758.710577249527 Processed: 6100 Time per image: 0.28831320938516836 (15641, 3) Time Elaped: 1788.5353860855103 Processed: 6200 Time per image: 0.2884734493686307 (15901, 3) Time Elaped: 1817.4015057086945 Processed: 6300 Time per image: 0.2884764294775706 (16120, 3) Time Elaped: 1846.8465275764465 Processed: 6400 Time per image: 0.2885697699338198 (16401, 3) Time Elaped: 1876.2937142848969 Processed: 6500 Time per image: 0.28866057142844564 (16602, 3) Time Elaped: 1905.8963482379913 Processed: 6600 Time per image: 0.28877217397545324 (16802, 3) Time Elaped: 1935.7733159065247 Processed: 6700 Time per image: 0.28892139043380966 (17029, 3) Time Elaped: 1964.4480175971985 Processed: 6800 Time per image: 0.2888894143525292 (17219, 3) Time Elaped: 1993.5766332149506 Processed: 6900 Time per image: 0.2889241497412972 (17475, 3) Time Elaped: 2022.9599840641022 Processed: 7000 Time per image: 0.28899428343772887 (17707, 3) Time Elaped: 2052.72513628006 Processed: 7100 Time per image: 0.2891162163774732 (17974, 3) Time Elaped: 2081.55570602417 Processed: 7200 Time per image: 0.2891049591700236 (18184, 3) Time Elaped: 2110.073085308075 Processed: 7300 Time per image: 0.28905110757644864 (18390, 3) Time Elaped: 2138.7212765216827 Processed: 7400 Time per image: 0.2890163887191463 (18638, 3) Time Elaped: 2167.8509633541107 Processed: 7500 Time per image: 0.2890467951138814 (18836, 3) Time Elaped: 2197.6458101272583 Processed: 7600 Time per image: 0.2891639223851656 (19046, 3) Time Elaped: 2226.4469277858734 Processed: 7700 Time per image: 0.28914895166050303 (19251, 3) Time Elaped: 2254.7976269721985 Processed: 7800 Time per image: 0.28907661884258956 (19477, 3) Time Elaped: 2284.074457168579 Processed: 7900 Time per image: 0.2891233490086809 (19695, 3) Time Elaped: 2314.077735900879 Processed: 8000 Time per image: 0.2892597169876099 (19928, 3) Time Elaped: 2342.5881028175354 Processed: 8100 Time per image: 0.2892084077552513 (20123, 3) Time Elaped: 2372.746148109436 Processed: 8200 Time per image: 0.2893592863548093 (20399, 3) Time Elaped: 2401.931491136551 Processed: 8300 Time per image: 0.28938933628151214 (20633, 3) Time Elaped: 2431.394143104553 Processed: 8400 Time per image: 0.289451683702923 (20833, 3) Time Elaped: 2460.2462451457977 Processed: 8500 Time per image: 0.28944073472303505 (21035, 3) Time Elaped: 2488.801407814026 Processed: 8600 Time per image: 0.28939551253651463 (21261, 3) Time Elaped: 2517.862549304962 Processed: 8700 Time per image: 0.28940948842585773 (21499, 3) Time Elaped: 2546.557683467865 Processed: 8800 Time per image: 0.2893815549395301 (21708, 3) Time Elaped: 2575.8473868370056 Processed: 8900 Time per image: 0.28942105470078716 (21918, 3) Time Elaped: 2604.756509780884 Processed: 9000 Time per image: 0.28941738997565375 (22111, 3) Time Elaped: 2633.952294111252 Processed: 9100 Time per image: 0.2894453070451925 (22320, 3) Time Elaped: 2662.2786910533905 Processed: 9200 Time per image: 0.28937811859275986 (22537, 3) Time Elaped: 2690.995730161667 Processed: 9300 Time per image: 0.289354379587276 (22766, 3) Time Elaped: 2720.411150455475 Processed: 9400 Time per image: 0.2894054415378165 (22991, 3) Time Elaped: 2749.4169206619263 Processed: 9500 Time per image: 0.2894123074380975 (23202, 3) Time Elaped: 2778.018733024597 Processed: 9600 Time per image: 0.28937695135672886 (23383, 3) Time Elaped: 2808.0951290130615 Processed: 9700 Time per image: 0.28949434319722284 (23623, 3) Time Elaped: 2836.623816013336 Processed: 9800 Time per image: 0.2894514097972792 (23840, 3) Time Elaped: 2866.9033551216125 Processed: 9900 Time per image: 0.28958619748703157 (24072, 3) Time Elaped: 2896.3130910396576 Processed: 10000 Time per image: 0.2896313091039658 (24285, 3) Time Elaped: 2925.524665117264 Processed: 10100 Time per image: 0.28965590743735287 (24580, 3) Time Elaped: 2954.157437324524 Processed: 10200 Time per image: 0.28962327816907096 (24793, 3) Time Elaped: 2982.574969768524 Processed: 10300 Time per image: 0.2895703854144198 (24975, 3) Time Elaped: 3012.3800251483917 Processed: 10400 Time per image: 0.28965192549503765 (25225, 3) Time Elaped: 3041.7287590503693 Processed: 10500 Time per image: 0.28968845324289233 (25469, 3) Time Elaped: 3070.770334482193 Processed: 10600 Time per image: 0.2896953145737918 (25686, 3) Time Elaped: 3100.0478734970093 Processed: 10700 Time per image: 0.2897241003268233 (25881, 3) Time Elaped: 3129.0616779327393 Processed: 10800 Time per image: 0.2897279331419203 (26106, 3) Time Elaped: 3158.663716316223 Processed: 10900 Time per image: 0.28978566204735995 (26368, 3) Time Elaped: 3187.8100588321686 Processed: 11000 Time per image: 0.28980091443928807 (26575, 3) Time Elaped: 3216.945822954178 Processed: 11100 Time per image: 0.28981493900488087 (26819, 3) Time Elaped: 3245.914274454117 Processed: 11200 Time per image: 0.28981377450483187 (27042, 3) Time Elaped: 3275.271031856537 Processed: 11300 Time per image: 0.289846993969605 (27279, 3) Time Elaped: 3303.8192257881165 Processed: 11400 Time per image: 0.28980870401650144 (27495, 3) Time Elaped: 3332.6618888378143 Processed: 11500 Time per image: 0.2897966859858969 (27741, 3) Time Elaped: 3362.1033115386963 Processed: 11600 Time per image: 0.28983649237402553 (28004, 3) Time Elaped: 3390.7454471588135 Processed: 11700 Time per image: 0.289807303175967 (28207, 3) Time Elaped: 3419.808148622513 Processed: 11800 Time per image: 0.2898142498832638 (28445, 3) Time Elaped: 3448.3009102344513 Processed: 11900 Time per image: 0.2897731857339875 (28624, 3) Time Elaped: 3477.92924952507 Processed: 12000 Time per image: 0.2898274374604225 (28846, 3) Time Elaped: 3506.8459804058075 Processed: 12100 Time per image: 0.28982198185171965 (29101, 3) Time Elaped: 3535.6172318458557 Processed: 12200 Time per image: 0.2898046911349062 (29336, 3) Time Elaped: 3564.5051939487457 Processed: 12300 Time per image: 0.28979717023973545 (29539, 3) Time Elaped: 3593.853472471237 Processed: 12400 Time per image: 0.2898268929412288 (29749, 3) Time Elaped: 3623.7034549713135 Processed: 12500 Time per image: 0.2898962763977051 (30027, 3) Time Elaped: 3652.96448969841 Processed: 12600 Time per image: 0.289917816642731 (30269, 3) Time Elaped: 3682.979502439499 Processed: 12700 Time per image: 0.2899983860188582 (30521, 3) Time Elaped: 3711.786646127701 Processed: 12800 Time per image: 0.28998333172872665 (30739, 3) Time Elaped: 3740.4858016967773 Processed: 12900 Time per image: 0.28996013966641687 (30940, 3) Time Elaped: 3769.5454392433167 Processed: 13000 Time per image: 0.2899650337879474 (31159, 3) Time Elaped: 3798.2390422821045 Processed: 13100 Time per image: 0.28994191162458816 (31389, 3) Time Elaped: 3827.6699895858765 Processed: 13200 Time per image: 0.28997499921105124 (31610, 3) Time Elaped: 3844.0657062530518 Processed: 13255 Time per image: 0.29000872925334226 (31742, 3) Total unique files: 13077
| filepath | bbox | classname | |
|---|---|---|---|
| 0 | /content/dataset/face-mask-detection-dataset/M... | [335.09497, 372.39194, 547.44525, 1015.1521] | person |
| 1 | /content/dataset/face-mask-detection-dataset/M... | [1127.332, 598.92267, 1495.2758, 977.5641] | person |
| 2 | /content/dataset/face-mask-detection-dataset/M... | [716.7619, 568.66907, 1002.6879, 1007.682] | person |
| 3 | /content/dataset/face-mask-detection-dataset/M... | [685.6409, 405.2225, 853.13635, 663.9724] | person |
| 4 | /content/dataset/face-mask-detection-dataset/M... | [1699.7372, 251.95367, 1968.6167, 961.51874] | person |
| ... | ... | ... | ... |
| 31737 | /content/dataset/medical-masks/medical_masks/t... | [258.9759, 32.17186, 696.0147, 616.2622] | person |
| 31738 | /content/dataset/medical-masks/medical_masks/t... | [668.76984, 26.868143, 1020.0332, 606.88885] | person |
| 31739 | /content/dataset/medical-masks/medical_masks/t... | [0.0, 97.67061, 281.69183, 617.07294] | person |
| 31740 | /content/dataset/medical-masks/medical_masks/t... | [20.156015, 35.309128, 1015.38196, 1077.4785] | person |
| 31741 | /content/dataset/medical-masks/medical_masks/t... | [46.871864, 87.06087, 199.05772, 212.47029] | person |
31742 rows × 3 columns
!cp "dataset_person.pkl" "/content/drive/My Drive/datasets/facemask/dataset_person_bbox.pkl"
from detectron2.structures import BoxMode
idx = 0
classes = ['person']
def get_dataset_dict(df):
global idx
dataset_dicts = []
# for idx, v in enumerate(imgs_anns.values()):
record = {}
filename = df['filepath'].tolist()[0]
height, width = cv2.imread(filename).shape[:2]
record["file_name"] = filename
record["image_id"] = idx
record["height"] = height
record["width"] = width
idx += 1
objs = []
for _, row in df.iterrows():
x1, y1, x2, y2 = row['bbox']
px = [x1, x2]
py = [y1, y2]
# poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)]
poly = [(x1 + 0.5, y1 + 0.5), (x2, y1), (x2, y2), (x1, y2)]
poly = [p for x in poly for p in x]
obj = {
"bbox": [np.min(px), np.min(py), np.max(px), np.max(py)],
"bbox_mode": BoxMode.XYXY_ABS,
"segmentation": [poly],
"category_id": classes.index(row['classname']),
}
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
return dataset_dicts
dataset_dicts_all = []
for cntr, filepath in enumerate(df.head(2000)['filepath'].unique()):
df_ = df[df['filepath'] == filepath]
dataset_dicts = get_dataset_dict(df_)
dataset_dicts_all.extend(dataset_dicts)
meta = "person_2"
DatasetCatalog.register(meta + "_train", lambda: dataset_dicts_all)
MetadataCatalog.get(meta + "_train").set(thing_classes=classes)
train_metadata = MetadataCatalog.get(meta + "_train")
for d in random.sample(dataset_dicts_all, 3):
img = cv2.imread(d["file_name"])
visualizer = Visualizer(img[:, :, ::-1], metadata=train_metadata, scale=0.5)
out = visualizer.draw_dataset_dict(d)
cv2_imshow(out.get_image()[:, :, ::-1])